PyObject *kwds)
{
unsigned int clear = 0, index = 0, incremental = 0;
- char _str[32768], *str = _str;
- unsigned int count = 32768;
+ unsigned int count = 16384 + 1, size = count;
+ char *str = malloc(size), *ptr;
+ PyObject *obj;
int ret;
static char *kwd_list[] = { "clear", "index", "incremental", NULL };
if ( !PyArg_ParseTupleAndKeywords(args, kwds, "|iii", kwd_list,
- &clear, &index, &incremental) )
+ &clear, &index, &incremental) ||
+ !str )
return NULL;
ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
if ( ret < 0 )
return pyxc_error_to_exception();
- return PyString_FromStringAndSize(str, count);
+ while ( !incremental && count == size )
+ {
+ size += count - 1;
+ if ( size < count )
+ break;
+
+ ptr = realloc(str, size);
+ if ( !ptr )
+ break;
+
+ str = ptr + count;
+ count = size - count;
+ ret = xc_readconsolering(self->xc_handle, &str, &count, clear,
+ 1, &index);
+ if ( ret < 0 )
+ break;
+
+ count += str - ptr;
+ str = ptr;
+ }
+
+ obj = PyString_FromStringAndSize(str, count);
+ free(str);
+ return obj;
}
.sbss : AT(ADDR(.sbss) - LOAD_OFFSET)
{ *(.sbss) *(.scommon) }
.bss : AT(ADDR(.bss) - LOAD_OFFSET)
- { *(.bss) *(COMMON) }
+ {
+ . = ALIGN(PAGE_SIZE);
+ *(.bss.page_aligned)
+ *(.bss)
+ *(COMMON)
+ }
_end = .;
}
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
phys_proc_id[cpu] >>= bits;
- printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
- cpu, c->x86_max_cores, cpu_core_id[cpu]);
+ if (opt_cpu_info)
+ printk("CPU %d(%d) -> Core %d\n",
+ cpu, c->x86_max_cores, cpu_core_id[cpu]);
}
#endif
integer_param("cachesize", cachesize_override);
+int __cpuinitdata opt_cpu_info;
+boolean_param("cpuinfo", opt_cpu_info);
+
int __cpuinit get_model_name(struct cpuinfo_x86 *c)
{
unsigned int *v;
if (n >= 0x80000005) {
cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
- printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
- edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
+ if (opt_cpu_info)
+ printk("CPU: L1 I cache %dK (%d bytes/line),"
+ " D cache %dK (%d bytes/line)\n",
+ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
c->x86_cache_size=(ecx>>24)+(edx>>24);
}
c->x86_cache_size = l2size;
- printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
- l2size, ecx & 0xFF);
+ if (opt_cpu_info)
+ printk("CPU: L2 Cache: %dK (%d bytes/line)\n",
+ l2size, ecx & 0xFF);
}
/* Naming convention should be: <Name> [(<Codename>)] */
index_msb = get_count_order(c->x86_num_siblings);
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
- printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
- phys_proc_id[cpu]);
+ if (opt_cpu_info)
+ printk("CPU: Physical Processor ID: %d\n",
+ phys_proc_id[cpu]);
c->x86_num_siblings = c->x86_num_siblings / c->x86_max_cores;
cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
((1 << core_bits) - 1);
- if (c->x86_max_cores > 1)
- printk(KERN_INFO "CPU: Processor Core ID: %d\n",
+ if (opt_cpu_info && c->x86_max_cores > 1)
+ printk("CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}
}
#endif
-void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
+void __cpuinit print_cpu_info(unsigned int cpu)
{
- char *vendor = NULL;
+ const struct cpuinfo_x86 *c = cpu_data + cpu;
+ const char *vendor = NULL;
+
+ if (!opt_cpu_info)
+ return;
+
+ printk("CPU%u: ", cpu);
if (c->x86_vendor < X86_VENDOR_NUM)
vendor = this_cpu->c_vendor;
printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
for (;;) local_irq_enable();
}
- printk(KERN_INFO "Initializing CPU#%d\n", cpu);
+ if (opt_cpu_info)
+ printk("Initializing CPU#%d\n", cpu);
if (cpu_has_pat)
wrmsrl(MSR_IA32_CR_PAT, host_pat);
l3 = new_l3;
}
- if (trace)
- printk (KERN_INFO "CPU: Trace cache: %dK uops", trace);
- else if ( l1i )
- printk (KERN_INFO "CPU: L1 I cache: %dK", l1i);
-
- if (l1d)
- printk(", L1 D cache: %dK\n", l1d);
- else
- printk("\n");
-
- if (l2)
- printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
-
- if (l3)
- printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
+ if (opt_cpu_info) {
+ if (trace)
+ printk("CPU: Trace cache: %dK uops", trace);
+ else if ( l1i )
+ printk("CPU: L1 I cache: %dK", l1i);
+
+ if (l1d)
+ printk(", L1 D cache: %dK\n", l1d);
+ else
+ printk("\n");
+
+ if (l2)
+ printk("CPU: L2 cache: %dK\n", l2);
+
+ if (l3)
+ printk("CPU: L3 cache: %dK\n", l3);
+ }
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
}
/* AMD Family10 machine check */
-int amd_f10_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c)
{
- if (!amd_k8_mcheck_init(c))
- return 0;
+ if (amd_k8_mcheck_init(c) == mcheck_none)
+ return mcheck_none;
x86_mce_callback_register(amd_f10_handler);
- printk("CPU%i: AMD Family%xh machine check reporting enabled\n",
- smp_processor_id(), c->x86);
-
- return 1;
+ return mcheck_amd_famXX;
}
}
/* AMD K8 machine check */
-int amd_k8_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c)
{
uint32_t i;
enum mcequirk_amd_flags quirkflag;
/* Check for PPro style MCA; our caller has confirmed MCE support. */
if (!cpu_has(c, X86_FEATURE_MCA))
- return 0;
+ return mcheck_none;
quirkflag = mcequirk_lookup_amd_quirkdata(c);
}
set_in_cr4(X86_CR4_MCE);
- if (c->x86 < 0x10 || c->x86 > 0x11)
- printk("CPU%i: AMD K8 machine check reporting enabled\n",
- smp_processor_id());
- return 1;
+ return mcheck_amd_k8;
}
/* AMD K7 machine check */
-int amd_k7_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c)
{
u32 l, h;
int i;
/* Check for PPro style MCA; our caller has confirmed MCE support. */
if (!cpu_has(c, X86_FEATURE_MCA))
- return 0;
+ return mcheck_none;
x86_mce_vector_register(k7_machine_check);
}
set_in_cr4 (X86_CR4_MCE);
- printk (KERN_INFO "CPU%d: AMD K7 machine check reporting enabled.\n",
- smp_processor_id());
- return 1;
+ return mcheck_amd_k7;
}
}
}
-static int amd_mcheck_init(struct cpuinfo_x86 *ci)
+static enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *ci)
{
- int rc = 0;
+ enum mcheck_type rc = mcheck_none;
switch (ci->x86) {
case 6:
/* This has to be run for each processor */
void mcheck_init(struct cpuinfo_x86 *c)
{
- int inited = 0, i, broadcast;
+ int i, broadcast;
+ enum mcheck_type inited = mcheck_none;
+ static enum mcheck_type g_type = mcheck_unset;
static int broadcast_check;
if (mce_disabled == 1) {
if (g_mcg_cap & MCG_CTL_P)
rdmsrl(MSR_IA32_MCG_CTL, h_mcg_ctl);
set_poll_bankmask(c);
- if (!inited)
- printk(XENLOG_INFO "CPU%i: No machine check initialization\n",
- smp_processor_id());
+
+ if (inited != g_type) {
+ char prefix[20];
+ static const char *const type_str[] = {
+ [mcheck_amd_famXX] = "AMD",
+ [mcheck_amd_k7] = "AMD K7",
+ [mcheck_amd_k8] = "AMD K8",
+ [mcheck_intel] = "Intel"
+ };
+
+ snprintf(prefix, ARRAY_SIZE(prefix),
+ g_type != mcheck_unset ? XENLOG_WARNING "CPU%i: "
+ : XENLOG_INFO,
+ smp_processor_id());
+ BUG_ON(inited >= ARRAY_SIZE(type_str));
+ switch (inited) {
+ default:
+ printk("%s%s machine check reporting enabled\n",
+ prefix, type_str[inited]);
+ break;
+ case mcheck_amd_famXX:
+ printk("%s%s Fam%xh machine check reporting enabled\n",
+ prefix, type_str[inited], c->x86);
+ break;
+ case mcheck_none:
+ printk("%sNo machine check initialization\n", prefix);
+ break;
+ }
+
+ g_type = inited;
+ }
}
u64 mce_cap_init(void)
printk(s, ##a); \
} while (0)
+enum mcheck_type {
+ mcheck_unset = -1,
+ mcheck_none,
+ mcheck_amd_famXX,
+ mcheck_amd_k7,
+ mcheck_amd_k8,
+ mcheck_intel
+};
/* Init functions */
-int amd_k7_mcheck_init(struct cpuinfo_x86 *c);
-int amd_k8_mcheck_init(struct cpuinfo_x86 *c);
-int amd_f10_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_k7_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_k8_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type amd_f10_mcheck_init(struct cpuinfo_x86 *c);
-int intel_mcheck_init(struct cpuinfo_x86 *c);
+enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c);
void intel_mcheck_timer(struct cpuinfo_x86 *c);
void mce_intel_feature_init(struct cpuinfo_x86 *c);
l = apic_read (APIC_LVTTHMR);
apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
- printk (KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n",
- cpu, tm2 ? "TM2" : "TM1");
+ if (opt_cpu_info)
+ printk(KERN_INFO "CPU%u: Thermal monitoring enabled (%s)\n",
+ cpu, tm2 ? "TM2" : "TM1");
return;
}
#endif /* CONFIG_X86_MCE_THERMAL */
int cpu = smp_processor_id();
if (!mce_available(c) || !cmci_support) {
- mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
+ if (opt_cpu_info)
+ mce_printk(MCE_QUIET, "CMCI: CPU%d has no CMCI support\n", cpu);
return;
}
}
/* p4/p6 family have similar MCA initialization process */
-int intel_mcheck_init(struct cpuinfo_x86 *c)
+enum mcheck_type intel_mcheck_init(struct cpuinfo_x86 *c)
{
_mce_cap_init(c);
- mce_printk(MCE_QUIET, "Intel machine check reporting enabled on CPU#%d.\n",
- smp_processor_id());
/* machine check is available */
x86_mce_vector_register(intel_machine_check);
mce_set_owner();
open_softirq(MACHINE_CHECK_SOFTIRQ, mce_softirq);
- return 1;
+ return mcheck_intel;
}
int intel_mce_wrmsr(uint32_t msr, uint64_t val)
void hvm_asid_init(int nasids)
{
+ static s8 g_disabled = -1;
struct hvm_asid_data *data = &this_cpu(hvm_asid_data);
/*
data->max_asid = nasids - 1;
data->disabled = (nasids <= 1);
- printk("HVM: ASIDs %s \n",
- (data->disabled ? "disabled." : "enabled."));
+ if ( g_disabled != data->disabled )
+ {
+ printk("HVM: ASIDs %sabled.\n", data->disabled ? "dis" : "en");
+ if ( g_disabled < 0 )
+ g_disabled = data->disabled;
+ }
/* Zero indicates 'invalid generation', so we start the count at one. */
data->core_asid_generation = 1;
node = 0;
numa_set_node(cpu, node);
- if ( acpi_numa > 0 )
- printk(KERN_INFO "CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
+ if ( opt_cpu_info && acpi_numa > 0 )
+ printk("CPU %d APIC %d -> Node %d\n", cpu, apicid, node);
}
/*
start_eip = setup_trampoline();
/* So we see what's up */
- printk("Booting processor %d/%d eip %lx\n", cpu, apicid, start_eip);
+ if (opt_cpu_info)
+ printk("Booting processor %d/%d eip %lx\n",
+ cpu, apicid, start_eip);
stack_start.esp = prepare_idle_stack(cpu);
if (cpu_isset(cpu, cpu_callin_map)) {
/* number CPUs logically, starting from 1 (BSP is 0) */
Dprintk("OK.\n");
- printk("CPU%d: ", cpu);
- print_cpu_info(&cpu_data[cpu]);
+ print_cpu_info(cpu);
Dprintk("CPU has booted.\n");
} else {
boot_error = 1;
* Setup boot CPU information
*/
smp_store_cpu_info(0); /* Final full version of the data */
- printk("CPU%d: ", 0);
- print_cpu_info(&cpu_data[0]);
+ print_cpu_info(0);
boot_cpu_physical_apicid = get_apic_id();
x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
#define _CONRING_SIZE 16384
#define CONRING_IDX_MASK(i) ((i)&(conring_size-1))
-static char _conring[_CONRING_SIZE], *__read_mostly conring = _conring;
+static char
+#if _CONRING_SIZE >= PAGE_SIZE
+ __attribute__((__section__(".bss.page_aligned"), __aligned__(PAGE_SIZE)))
+#endif
+ _conring[_CONRING_SIZE];
+static char *__read_mostly conring = _conring;
static uint32_t __read_mostly conring_size = _CONRING_SIZE;
static uint32_t conringc, conringp;
serial_init_postirq();
+ if ( !opt_conring_size )
+ opt_conring_size = num_present_cpus() << (9 + xenlog_lower_thresh);
/* Round size down to a power of two. */
while ( opt_conring_size & (opt_conring_size - 1) )
opt_conring_size &= opt_conring_size - 1;
spin_unlock_irq(&console_lock);
printk("Allocated console ring of %u KiB.\n", opt_conring_size >> 10);
+
+ init_xenheap_pages(__pa(_conring), __pa(_conring + _CONRING_SIZE));
}
void __init console_endboot(void)
extern u64 host_pat;
extern int phys_proc_id[NR_CPUS];
extern int cpu_core_id[NR_CPUS];
+extern int opt_cpu_info;
extern void identify_cpu(struct cpuinfo_x86 *);
extern void setup_clear_cpu_cap(unsigned int);
-extern void print_cpu_info(struct cpuinfo_x86 *);
+extern void print_cpu_info(unsigned int cpu);
extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
extern void dodgy_tsc(void);